bitkeeper revision 1.1375 (426a2efcnNwCi2J-ptewqWF_62U1-g)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 23 Apr 2005 11:18:20 +0000 (11:18 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 23 Apr 2005 11:18:20 +0000 (11:18 +0000)
xenpmap.h, hypervisor.h, xen_machdep.c, pmap.c, machdep.c:
  Writable pagetables for freebsd.

freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c
freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c
freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c
freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h
freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h

index f954962f1ea425b17228a0f9f520934248673fc4..b61d9ceeb4ee3a376be4e833c772d53829e6bee5 100644 (file)
@@ -1374,7 +1374,6 @@ extern unsigned long cpu0prvpage;
 extern unsigned long *SMPpt;
 pteinfo_t *pteinfo_list;
 unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0));
-pt_entry_t *KPTphysv;
 int preemptable;
 int gdt_set;
 
@@ -1386,8 +1385,9 @@ void
 initvalues(start_info_t *startinfo)
 { 
     int i;
+    vm_paddr_t pdir_shadow_ma, KPTphys;
 #ifdef WRITABLE_PAGETABLES
-    XENPRINTF("using writable pagetables\n");
+    printk("using writable pagetables\n");
     HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
 #endif
 
@@ -1398,18 +1398,17 @@ initvalues(start_info_t *startinfo)
     /* pre-zero unused mapped pages */
     bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 - tmpindex)*PAGE_SIZE); 
     IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
-    KPTphysv = (pt_entry_t *)(startinfo->pt_base + PAGE_SIZE);
+    KPTphys = xpmap_ptom(__pa(startinfo->pt_base + PAGE_SIZE));
     XENPRINTF("IdlePTD %p\n", IdlePTD);
     XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
              "mod_start: 0x%lx mod_len: 0x%lx\n",
              xen_start_info->nr_pages, xen_start_info->shared_info, 
              xen_start_info->flags, xen_start_info->pt_base, 
              xen_start_info->mod_start, xen_start_info->mod_len);
-    
-    /* setup self-referential mapping first so vtomach will work */
-    xen_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 
-                       PG_V | PG_A);
-    xen_flush_queue();
+
+
+
+
     /* Map proc0's UPAGES */
     proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
     tmpindex += UAREA_PAGES;
@@ -1439,9 +1438,11 @@ initvalues(start_info_t *startinfo)
     /* map SMP page table RO */
     PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW);
 
-    /* put the page table into the pde */
-    xen_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
-
+    /* put the page table into the page directory */
+    xen_queue_pt_update((vm_paddr_t)(IdlePTD + MPPTDI), 
+                       xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
+    xen_queue_pt_update(pdir_shadow_ma + MPPTDI*sizeof(vm_paddr_t), 
+                       xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_V | PG_A);
     tmpindex++;
 #endif
 
@@ -1454,16 +1455,34 @@ initvalues(start_info_t *startinfo)
 #endif
     /* unmap remaining pages from initial 4MB chunk */
     for (i = tmpindex; i%1024 != 0; i++) 
-       PT_CLEAR_VA(KPTphysv + i, TRUE);
+       xen_queue_pt_update(KPTphys + i*sizeof(vm_paddr_t), 0);
+    xen_flush_queue();
+    
+    pdir_shadow_ma = xpmap_ptom(tmpindex << PAGE_SHIFT);
+    tmpindex++;
+
+    /* setup shadow mapping first so vtomach will work */
+    xen_pt_pin((vm_paddr_t)pdir_shadow_ma);
+    xen_queue_pt_update((vm_paddr_t)(IdlePTD + PTDPTDI), 
+                       pdir_shadow_ma | PG_V | PG_A | PG_RW | PG_M);
+    xen_queue_pt_update(pdir_shadow_ma + PTDPTDI*sizeof(vm_paddr_t), 
+                       ((vm_paddr_t)IdlePTD) | PG_V | PG_A);
+    xen_queue_pt_update(pdir_shadow_ma + KPTDI*sizeof(vm_paddr_t), 
+                       KPTphys | PG_V | PG_A);
 
     /* allocate remainder of NKPT pages */
-    for (i = 0; i < NKPT-1; i++, tmpindex++)
-       PD_SET_VA(((unsigned long *)startinfo->pt_base) + KPTDI + i + 1, (tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A, TRUE);
+    for (i = 0; i < NKPT-1; i++, tmpindex++) {
+       xen_queue_pt_update((vm_paddr_t)(IdlePTD + KPTDI + i + 1), 
+                           xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A));
+       xen_queue_pt_update(pdir_shadow_ma + (KPTDI + i + 1)*sizeof(vm_paddr_t), 
+                           xpmap_ptom((tmpindex << PAGE_SHIFT)| PG_V | PG_A));
+    }
     tmpindex += NKPT-1;
     PT_UPDATES_FLUSH();
 
     HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
-    PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
+    PT_SET_MA(HYPERVISOR_shared_info, 
+             xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
     tmpindex++;
 
     HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned long)xen_phys_machine;
@@ -1572,10 +1591,9 @@ init386(void)
 
        PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
        gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
-       if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) {
-           XENPRINTF("set_gdt failed\n");
-
-       }
+       if ((error = HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1))) 
+           panic("set_gdt failed");
+       
        lgdt_finish();
        gdt_set = 1;
 
index a9e536f41ce340537a1c6564ca51402125efcc8c..79b052a34c44d297aa6b375be1e5d813eb91d573 100644 (file)
@@ -273,6 +273,7 @@ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
                                        vm_offset_t va);
+static void pmap_copy_ma(vm_paddr_t src, vm_paddr_t dst);
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
 
 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@@ -300,6 +301,32 @@ static void pmap_mark_unprivileged(unsigned long pa);
 static void pmap_dec_ref_page(vm_page_t m);
 int pmap_pid_dump(int pid);
 #endif
+
+void 
+pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type)
+{
+       vm_paddr_t shadow_pdir_ma = pmap->pm_pdir[PTDPTDI] & ~0xFFF;
+       vm_paddr_t shadow_offset = (vm_paddr_t)(ptr - pmap->pm_pdir)*sizeof(vm_paddr_t);
+       
+       switch (type) {
+       case SH_PD_SET_VA:
+               xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
+                                   xpmap_ptom(val & ~(PG_RW|PG_M)));
+               xen_queue_pt_update(vtomach(ptr),
+                                   xpmap_ptom(val));   
+               break;
+       case SH_PD_SET_VA_MA:
+               xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 
+                                   val & ~(PG_RW|PG_M));
+               xen_queue_pt_update(vtomach(ptr), val);         
+               break;
+       case SH_PD_SET_VA_CLEAR:
+               xen_queue_pt_update(shadow_pdir_ma + shadow_offset, 0);
+               xen_queue_pt_update(vtomach(ptr), 0);   
+               break;
+       }
+}
+
 /*
  * Move the kernel virtual free pointer to the next
  * 4MB.  This is used to help improve performance
@@ -335,7 +362,6 @@ pmap_bootstrap(firstaddr, loadaddr)
 {
        vm_offset_t va;
        pt_entry_t *pte, *unused;
-       int i;
 
        /*
         * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
@@ -416,8 +442,6 @@ pmap_bootstrap(firstaddr, loadaddr)
        PT_CLEAR_VA(CMAP1, FALSE);
        PT_CLEAR_VA(CMAP2, FALSE);
 
-       for (i = 0; i < NKPT; i++)
-               PD_CLEAR_VA(&PTD[i], FALSE);
        PT_UPDATES_FLUSH();
 #ifdef XEN_UNNEEDED
        /* Turn on PG_G on kernel page(s) */
@@ -767,7 +791,7 @@ pmap_invalidate_all(pmap_t pmap)
 static __inline int
 pmap_is_current(pmap_t pmap)
 {
-
+    /* XXX validate */
        return (pmap == kernel_pmap ||
                (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
            (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
@@ -794,7 +818,7 @@ pmap_pte(pmap_t pmap, vm_offset_t va)
                newpf = PT_GET(pde) & PG_FRAME;
                tmppf = PT_GET(PMAP2) & PG_FRAME;
                if (tmppf != newpf) {
-                       PD_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
+                       PT_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
                        pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
                }
                return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
@@ -853,7 +877,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
                newpf = PT_GET(pde) & PG_FRAME;
                tmppf = PT_GET(PMAP1) & PG_FRAME;
                if (tmppf != newpf) {
-                       PD_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
+                       PT_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
 #ifdef SMP
                        PMAP1cpu = PCPU_GET(cpuid);
 #endif
@@ -1088,7 +1112,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
         * unmap the page table page
         */
        xen_pt_unpin(pmap->pm_pdir[m->pindex]);
-       PD_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE);
+       PD_CLEAR_VA(pmap, &pmap->pm_pdir[m->pindex], TRUE);
        --pmap->pm_stats.resident_count;
 
        /*
@@ -1146,8 +1170,8 @@ pmap_pinit0(pmap)
 void
 pmap_pinit(struct pmap *pmap)
 {
-       vm_page_t m, ptdpg[NPGPTD];
-       vm_paddr_t ma;
+       vm_page_t m, ptdpg[NPGPTD*2];
+       vm_paddr_t ma, ma_shadow;
        static int color;
        int i;
 
@@ -1173,7 +1197,7 @@ pmap_pinit(struct pmap *pmap)
        /*
         * allocate the page directory page(s)
         */
-       for (i = 0; i < NPGPTD;) {
+       for (i = 0; i < NPGPTD*2;) {
                m = vm_page_alloc(NULL, color++,
                    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
                    VM_ALLOC_ZERO);
@@ -1184,36 +1208,51 @@ pmap_pinit(struct pmap *pmap)
                        ptdpg[i++] = m;
                }
        }
+#ifdef PAE
+       #error "missing shadow handling for PAE"
+#endif
 
        pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 
-       for (i = 0; i < NPGPTD; i++) {
-               if ((ptdpg[i]->flags & PG_ZERO) == 0)
-                       bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
-       }
-
        mtx_lock_spin(&allpmaps_lock);
        LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
        mtx_unlock_spin(&allpmaps_lock);
        /* Wire in kernel global address entries. */
        /* XXX copies current process, does not fill in MPPTDI */
-       bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
+       bcopy(kernel_pmap->pm_pdir + KPTDI, pmap->pm_pdir + KPTDI, 
+             nkpt * sizeof(pd_entry_t));
+       /* XXX need to copy global address entries to page directory's L1 shadow */
+       ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD]));
+       /* L1 pin shadow page director{y,ies} */
+       for (i = 0; i < NPGPTD; i++) {
+               ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD + i]));
+               pmap_copy_ma(kernel_pmap->pm_pdir[PTDPTDI + i] & ~(PG_RW|PG_M), ma);
+               xen_pt_pin(ma);
+       }
+
 #ifdef SMP
-       pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
+       pmap->pm_pdir[MPPTDI] = kernel_pmap->pm_pdir[MPPTDI];
 #endif
 
-       /* install self-referential address mapping entry(s) */
+       /* pin and install L1 shadow */
        for (i = 0; i < NPGPTD; i++) {
                ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
-               pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A | PG_M;
+               ma_shadow = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD+i]));
+               /* re-map page directory read-only and pin */
+               PT_SET_MA(pmap->pm_pdir + i*PAGE_SIZE, ma | PG_V | PG_A);
+               xen_pgd_pin(ma);
+               /* add L1 shadow of L2 */
+               xen_queue_pt_update(vtomach(&pmap->pm_pdir[PTDPTDI + i]), 
+                                   ma_shadow | PG_V | PG_A); 
+               xen_queue_pt_update(ma_shadow + PTDPTDI*sizeof(vm_paddr_t), 
+                                   vtomach(pmap->pm_pdir) | PG_V | PG_A);
+
 #ifdef PAE
+               #error "unsupported currently"
                pmap->pm_pdpt[i] = ma | PG_V;
 #endif
-               /* re-map page directory read-only */
-               PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A);
-               xen_pgd_pin(ma);
-
        }
+       xen_flush_queue();
 
        pmap->pm_active = 0;
        TAILQ_INIT(&pmap->pm_pvlist);
@@ -1265,8 +1304,8 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 
        ptepa = VM_PAGE_TO_PHYS(m);
        xen_pt_pin(xpmap_ptom(ptepa));
-       PD_SET_VA(&pmap->pm_pdir[ptepindex], 
-               (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
+       PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex], 
+               (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
 
        return m;
 }
@@ -1423,7 +1462,7 @@ pmap_lazyfix(pmap_t pmap)
 void
 pmap_release(pmap_t pmap)
 {
-       vm_page_t m, ptdpg[NPGPTD];
+       vm_page_t m, ptdpg[NPGPTD + 1];
        vm_paddr_t ma;
        int i;
 
@@ -1436,25 +1475,27 @@ pmap_release(pmap_t pmap)
        LIST_REMOVE(pmap, pm_list);
        mtx_unlock_spin(&allpmaps_lock);
 
-       for (i = 0; i < NPGPTD; i++)
+       for (i = 0; i < NPGPTD; i++) {
                ptdpg[i] = PHYS_TO_VM_PAGE(PT_GET(&pmap->pm_pdir[PTDPTDI + i]));
-
-       for (i = 0; i < nkpt + NPGPTD; i++)
-               PD_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE);
+       }
+       ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir));
+       for (i = 0; i < nkpt + NPGPTD; i++) 
+               PD_CLEAR_VA(pmap, &pmap->pm_pdir[PTDPTDI + i], FALSE);
 
        bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
            sizeof(*pmap->pm_pdir));
 #ifdef SMP
-       PD_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE);
+       PD_CLEAR_VA(pmap, &pmap->pm_pdir[MPPTDI], FALSE);
 #endif
        PT_UPDATES_FLUSH();
        pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 
        vm_page_lock_queues();
-       for (i = 0; i < NPGPTD; i++) {
+       for (i = 0; i < NPGPTD + 1; i++) {
                m = ptdpg[i];
                
                ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
+               /* unpinning L1 and L2 treated the same */
                 xen_pgd_unpin(ma);
 #ifdef PAE
                KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
@@ -1530,12 +1571,12 @@ pmap_growkernel(vm_offset_t addr)
                pmap_zero_page(nkpg);
                ptppaddr = VM_PAGE_TO_PHYS(nkpg);
                newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
-               PD_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE);
+               PD_SET_VA(kernel_pmap, &pdir_pde(kernel_pmap->pm_pdir, kernel_vm_end), newpdir, TRUE);
 
                mtx_lock_spin(&allpmaps_lock);
                LIST_FOREACH(pmap, &allpmaps, pm_list) {
                        pde = pmap_pde(pmap, kernel_vm_end);
-                       PD_SET_VA(pde, newpdir, FALSE);
+                       PD_SET_VA(pmap, pde, newpdir, FALSE);
                }
                PT_UPDATES_FLUSH();
                mtx_unlock_spin(&allpmaps_lock);
@@ -1752,7 +1793,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
                 * Check for large page.
                 */
                if ((ptpaddr & PG_PS) != 0) {
-                       PD_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE);
+                       PD_CLEAR_VA(pmap, &pmap->pm_pdir[pdirindex], TRUE);
                        pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
                        anyvalid = 1;
                        continue;
@@ -2326,7 +2367,7 @@ retry:
                pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
                npdes = size >> PDRSHIFT;
                for(i = 0; i < npdes; i++) {
-                       PD_SET_VA(&pmap->pm_pdir[ptepindex],
+                       PD_SET_VA(pmap, &pmap->pm_pdir[ptepindex],
                            ptepa | PG_U | PG_RW | PG_V | PG_PS, FALSE);
                        ptepa += NBPDR;
                        ptepindex += 1;
@@ -2458,7 +2499,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
                        
                if (srcptepaddr & PG_PS) {
                        if (dst_pmap->pm_pdir[ptepindex] == 0) {
-                               PT_SET_VA(&dst_pmap->pm_pdir[ptepindex], srcptepaddr, TRUE);
+                               PD_SET_VA(dst_pmap, &dst_pmap->pm_pdir[ptepindex], srcptepaddr, TRUE);
                                dst_pmap->pm_stats.resident_count +=
                                    NBPDR / PAGE_SIZE;
                        }
@@ -2629,6 +2670,32 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
        mtx_unlock(&CMAPCADDR12_lock);
 }
 
+void
+pmap_copy_ma(vm_paddr_t src, vm_paddr_t dst)
+{
+
+       mtx_lock(&CMAPCADDR12_lock);
+       if (*CMAP1)
+               panic("pmap_copy_ma: CMAP1 busy");
+       if (*CMAP2)
+               panic("pmap_copy_ma: CMAP2 busy");
+       sched_pin();
+#ifdef I386_CPU
+       invltlb();
+#else
+       invlpg((u_int)CADDR1);
+       invlpg((u_int)CADDR2);
+#endif
+       PT_SET_VA_MA(CMAP1, PG_V | src | PG_A, FALSE);
+       PT_SET_VA_MA(CMAP2, PG_V | PG_RW | dst | PG_A | PG_M, TRUE);
+
+       bcopy(CADDR1, CADDR2, PAGE_SIZE);
+       PT_CLEAR_VA(CMAP1, FALSE);
+       PT_CLEAR_VA(CMAP2, TRUE);
+       sched_unpin();
+       mtx_unlock(&CMAPCADDR12_lock);
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may
@@ -2807,7 +2874,11 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
        boolean_t rv;
 
        rv = FALSE;
-
+       /* XXX 
+        * in order for writable pagetables to help, 
+        * this has to work - check if we aren't doing 
+        * an invlpg on the page tables linear mappings
+        */  
        return (rv);
        PMAP_LOCK(pmap);
        if (pmap_pde(pmap, addr)) {
@@ -3396,3 +3467,9 @@ pmap_pvdump(pa)
        printf(" ");
 }
 #endif
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
index b16edb6534435fc0a47f6caf854cbf4be3f3e345..85794f2c2e18d07f87e77e2fad45527ea0c6f0a9 100644 (file)
@@ -474,7 +474,7 @@ xen_machphys_update(unsigned long mfn, unsigned long pfn)
 }
 
 void
-xen_queue_pt_update(pt_entry_t *ptr, pt_entry_t val)
+xen_queue_pt_update(vm_paddr_t ptr, vm_paddr_t val)
 {
     SET_VCPU();
     
index c93727d90428517521653c35210898cb8a039ad7..b23221bdbd4763cd37426eb0910460c2b4e53cf8 100644 (file)
@@ -88,11 +88,13 @@ static inline int
 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
 {
     int ret;
+    unsigned long ign1, ign2;
+
     __asm__ __volatile__ (
         TRAP_INSTR
-        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
-        "b" (frame_list), "c" (entries) : "memory" );
-
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+        : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+        : "memory" );
 
     return ret;
 }
index c0fa939241ce46e61f4d41006beb50645948b574..b25b0b8fe6626ce22c5ab64bf416a3eb2d711679 100644 (file)
@@ -35,7 +35,7 @@
 #define _XEN_XENPMAP_H_
 #include <machine/xenvar.h>
 void xen_invlpg(vm_offset_t);
-void xen_queue_pt_update(pt_entry_t *, pt_entry_t);
+void xen_queue_pt_update(vm_paddr_t, vm_paddr_t);
 void xen_pt_switch(uint32_t);
 void xen_set_ldt(unsigned long, unsigned long);
 void xen_tlb_flush(void);
@@ -59,8 +59,8 @@ void pmap_ref(pt_entry_t *pte, unsigned long ma);
 #define PMAP_DEC_REF_PAGE(a)
 #endif
 
+#define WRITABLE_PAGETABLES
 #define ALWAYS_SYNC 0
-#define PT_DEBUG
 
 #ifdef PT_DEBUG
 #define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__) 
@@ -70,6 +70,13 @@ void pmap_ref(pt_entry_t *pte, unsigned long ma);
 
 #define pmap_valid_entry(E)           ((E) & PG_V) /* is PDE or PTE valid? */
 
+#define SH_PD_SET_VA        1
+#define SH_PD_SET_VA_MA     2
+#define SH_PD_SET_VA_CLEAR  3
+
+struct pmap;
+void pd_set(struct pmap *pmap, vm_paddr_t *ptr, vm_paddr_t val, int type);
+
 #define        PT_GET(_ptp)                                            \
        (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
 
@@ -90,20 +97,19 @@ void pmap_ref(pt_entry_t *pte, unsigned long ma);
         *(_ptp) = 0;                                            \
 } while (/*CONSTCOND*/0)
 
-#define PD_SET_VA(_ptp,_npte,sync) do {                                \
+#define PD_SET_VA(_pmap, _ptp, _npte, sync) do {                       \
         PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
-       xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)),      \
-                           xpmap_ptom((_npte)));               \
+        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA);           \
        if (sync || ALWAYS_SYNC) xen_flush_queue();             \
 } while (/*CONSTCOND*/0)
-#define PD_SET_VA_MA(_ptp,_npte,sync) do {                     \
+#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do {            \
         PMAP_REF((_ptp), (_npte));                              \
-       xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \
+        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA);        \
        if (sync || ALWAYS_SYNC) xen_flush_queue();             \
 } while (/*CONSTCOND*/0)
-#define PD_CLEAR_VA(_ptp, sync) do {                           \
+#define PD_CLEAR_VA(_pmap, _ptp, sync) do {                    \
         PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
-       xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0);    \
+        pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR);     \
        if (sync || ALWAYS_SYNC) xen_flush_queue();             \
 } while (/*CONSTCOND*/0)
 
@@ -128,9 +134,21 @@ void pmap_ref(pt_entry_t *pte, unsigned long ma);
                xen_flush_queue();                              \
 } while (/*CONSTCOND*/0)
 
-#define PD_SET_VA    PT_SET_VA
-#define PD_SET_VA_MA PT_SET_VA_MA
-#define PD_CLEAR_VA  PT_CLEAR_VA
+#define PD_SET_VA(_pmap, _ptp,_npte,sync) do {                 \
+        PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
+        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA);           \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
+#define PD_SET_VA_MA(_pmap, _ptp,_npte,sync) do {              \
+        PMAP_REF((_ptp), (_npte));                              \
+        pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA);        \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
+#define PD_CLEAR_VA(_pmap, _ptp, sync) do {                    \
+        PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
+        pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR);          \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
 
 #endif